Add per exec_domain event_channel initialization.
Make sched_rem_domain/rem_task act on exec_domain.
void domain_kill(struct domain *d)
{
+ struct exec_domain *ed;
+
domain_pause(d);
if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
{
- sched_rem_domain(d);
+ for_each_exec_domain(d, ed)
+ sched_rem_domain(ed);
domain_relinquish_memory(d);
put_domain(d);
}
atomic_set(&ed->pausecnt, 0);
shadow_lock_init(ed);
+ if ( (rc = init_exec_domain_event_channels(ed)) != 0 )
+ goto out;
+
memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
/* arch_do_createdomain */
sched_add_domain(ed);
- if ( (rc = arch_final_setup_guestos(ed, c)) != 0 )
+ if ( (rc = arch_final_setup_guestos(ed, c)) != 0 ) {
+ sched_rem_domain(ed);
goto out;
+ }
/* Set up the shared info structure. */
update_dom_time(d);
{
if ( max == MAX_EVENT_CHANNELS )
return -ENOSPC;
-
- max = port + EVENT_CHANNELS_SPREAD;
+
+ if ( port == 0 )
+ max = INIT_EVENT_CHANNELS;
+ else
+ max = port + EVENT_CHANNELS_SPREAD;
chn = xmalloc(max * sizeof(event_channel_t));
if ( unlikely(chn == NULL) )
* bound yet. The exception is the 'misdirect VIRQ', which is permanently
* bound to port 0.
*/
- if ( ((port = ed->virq_to_evtchn[virq]) != 0) ||
+ if ( ((port = ed->virq_to_evtchn[virq]) !=
+ (ed->eid * EVENT_CHANNELS_SPREAD)) ||
(virq == VIRQ_MISDIRECT) ||
((port = get_free_port(ed)) < 0) )
goto out;
chn1 = d1->event_channel;
/* NB. Port 0 is special (VIRQ_MISDIRECT). Never let it be closed. */
- if ( (port1 <= 0) || (port1 >= d1->max_event_channel) )
+ if ( (port1 <= 0) || (port1 >= d1->max_event_channel) ||
+ ((port1 & (EVENT_CHANNELS_SPREAD - 1)) == 0) )
{
rc = -EINVAL;
goto out;
}
+int init_exec_domain_event_channels(struct exec_domain *ed)
+{
+ struct domain *d = ed->domain;
+ int port, ret = -EINVAL, virq;
+
+ spin_lock(&d->event_channel_lock);
+ port = ed->eid * EVENT_CHANNELS_SPREAD;
+ if ( ((port < d->max_event_channel &&
+ d->event_channel[port].state != ECS_FREE)) ||
+ (get_free_port(ed) != port) )
+ goto out;
+ d->event_channel[port].state = ECS_VIRQ;
+ d->event_channel[port].u.virq = VIRQ_MISDIRECT;
+ for ( virq = 0; virq < NR_VIRQS; virq++ )
+ ed->virq_to_evtchn[virq] = port;
+ ret = 0;
+ out:
+ spin_unlock(&d->event_channel_lock);
+ return ret;
+}
+
int init_event_channels(struct domain *d)
{
spin_lock_init(&d->event_channel_lock);
- d->event_channel = xmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
- if ( unlikely(d->event_channel == NULL) )
- return -ENOMEM;
- d->max_event_channel = INIT_EVENT_CHANNELS;
- memset(d->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
- d->event_channel[0].state = ECS_VIRQ;
- d->event_channel[0].u.virq = VIRQ_MISDIRECT;
- return 0;
+ return init_exec_domain_event_channels(d->exec_domain[0]);
}
TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed);
}
-void sched_rem_domain(struct domain *d)
+void sched_rem_domain(struct exec_domain *ed)
{
- struct exec_domain *ed;
- for_each_exec_domain(d, ed)
- rem_ac_timer(&ed->timer);
- SCHED_OP(rem_task, d);
- TRACE_2D(TRC_SCHED_DOM_REM, d->id, d);
+ rem_ac_timer(&ed->timer);
+ SCHED_OP(rem_task, ed);
+ TRACE_3D(TRC_SCHED_DOM_REM, ed->domain->id, ed->eid, ed);
}
void init_idle_task(void)
int (*alloc_task) (struct exec_domain *);
void (*add_task) (struct exec_domain *);
void (*free_task) (struct domain *);
- void (*rem_task) (struct domain *);
+ void (*rem_task) (struct exec_domain *);
void (*sleep) (struct exec_domain *);
void (*wake) (struct exec_domain *);
void (*do_block) (struct exec_domain *);
int init_event_channels(struct domain *d);
void destroy_event_channels(struct domain *d);
+int init_exec_domain_event_channels(struct exec_domain *ed);
struct exec_domain
{
#define set_current_state(_s) do { current->state = (_s); } while (0)
void scheduler_init(void);
void schedulers_start(void);
-void sched_add_domain(struct exec_domain *d);
-void sched_rem_domain(struct domain *d);
+void sched_add_domain(struct exec_domain *);
+void sched_rem_domain(struct exec_domain *);
long sched_ctl(struct sched_ctl_cmd *);
long sched_adjdom(struct sched_adjdom_cmd *);
int sched_id();